library(tidyverse) # data manipulation
library(ggpubr) # producing data exploratory plots
library(modelsummary) # descriptive data
library(glmmTMB) # running generalised mixed models
library(DHARMa) # model diagnostics
library(performance) # model diagnostics
library(ggeffects) # partial effect plots
library(car) # running Anova on model
library(emmeans) # post-hoc analysis
library(MuMIn)egg <- read_csv("import_data/egg_size_data_2022_2023.csv") |>
mutate(across(1:14,factor)) |>
select(!c(NOTES,...18, IMAGE))
m1 <- read_csv("import_data/1_month_size_data_2022_2023.csv") |>
mutate(across(1:15,factor)) |>
mutate(STANDARD_LENGTH =LENGTH,
.keep = "unused") |>
select(!(NOTES)) |>
select(1:15,"STANDARD_LENGTH","MASS") |>
group_by(CLUTCH_NUMBER) |>
mutate(DENSITY = n()) |>
ungroup()
m2 <- read_csv("import_data/2_month_size_data_2022_2023.csv") |>
mutate(across(1:15,factor)) |>
mutate(STANDARD_LENGTH =LENGTH,
.keep = "unused") |>
select(!(NOTES)) |>
select(1:15,"STANDARD_LENGTH","MASS")|>
group_by(CLUTCH_NUMBER) |>
mutate(DENSITY = n()) |>
ungroup()
m2.5 <- read_csv("import_data/2-5_month_size_data_2022_2023.csv") |>
mutate(across(1:15,factor)) |>
mutate(STANDARD_LENGTH =LENGTH,
.keep = "unused") |>
select(!(NOTES)) |>
select(1:15,"STANDARD_LENGTH","MASS")|>
group_by(CLUTCH_NUMBER) |>
mutate(DENSITY = n()) |>
ungroup()
adult <- read_csv("import_data/adult_size_2022_2023.csv") |>
mutate(across(1:3,factor),
MALE = FISH_ID,
FEMALE = FISH_ID,
POPULATION = str_sub(FISH_ID, 2,4),
POPULATION = case_when(POPULATION == "ARL" ~ "Arlington Reef",
POPULATION == "SUD" ~ "Sudbury Reef",
POPULATION == "VLA" ~ "Vlassof cay",
POPULATION == "PRE" ~ "Pretty patches",
TRUE ~ POPULATION)) |>
left_join(select(m2.5, c("MALE","TEMPERATURE")),
by="MALE") |>
left_join(select(m2.5, c("FEMALE","TEMPERATURE")),
by="FEMALE") |>
distinct() |>
mutate(TEMPERATURE = coalesce(TEMPERATURE.x, TEMPERATURE.y)) |>
drop_na(TEMPERATURE) |>
select(-c("TEMPERATURE.x","TEMPERATURE.y")) m1_df_all <- m1 |>
left_join(select(adult, c("MALE", "SL", "MASS")),
by ="MALE") |>
mutate(SL_MALE =SL,
MASS_MALE =MASS.y,
.keep = "unused") |>
left_join(select(adult, c("FEMALE", "SL", "MASS")),
by ="FEMALE") |>
mutate(SL_FEMALE =SL,
MASS_FEMALE =MASS,
.keep ="unused") |>
mutate(SL_MIDPOINT = (SL_MALE+SL_FEMALE)/2,
MASS_MIDPOINT = (MASS_MALE+MASS_FEMALE)/2)
m1_df <- m1_df_all |>
group_by(CLUTCH_NUMBER) |>
mutate(MEDIAN_STANDARD_LENGTH = median(STANDARD_LENGTH)) |>
drop_na(MEDIAN_STANDARD_LENGTH) |>
ungroup() |>
select(-c("STANDARD_LENGTH","MASS.x", "SAMPLE_NO")) |>
distinct() |>
mutate(MASS_MIDPOINT =coalesce(MASS_MIDPOINT, MASS_MALE),
SL_MIDPOINT =coalesce(SL_MIDPOINT, SL_MALE))
m2_df_all <- m2 |>
left_join(select(adult, c("MALE", "SL", "MASS")),
by ="MALE") |>
mutate(SL_MALE =SL,
MASS_MALE =MASS.y,
.keep = "unused") |>
left_join(select(adult, c("FEMALE", "SL", "MASS")),
by ="FEMALE") |>
mutate(SL_FEMALE =SL,
MASS_FEMALE =MASS,
.keep ="unused") |>
mutate(SL_MIDPOINT = (SL_MALE+SL_FEMALE)/2,
MASS_MIDPOINT = (MASS_MALE+MASS_FEMALE)/2)
m2_df <- m2_df_all |>
group_by(CLUTCH_NUMBER) |>
mutate(MEDIAN_STANDARD_LENGTH = median(STANDARD_LENGTH)) |>
drop_na(MEDIAN_STANDARD_LENGTH) |>
ungroup() |>
select(-c("STANDARD_LENGTH","MASS.x", "SAMPLE_NO")) |>
distinct() |>
mutate(MASS_MIDPOINT =coalesce(MASS_MIDPOINT, MASS_MALE),
SL_MIDPOINT =coalesce(SL_MIDPOINT, SL_MALE))
m2.5_df_all <- m2.5 |>
left_join(select(adult, c("MALE", "SL", "MASS")),
by ="MALE") |>
mutate(SL_MALE =SL,
MASS_MALE =MASS.y,
.keep = "unused") |>
left_join(select(adult, c("FEMALE", "SL", "MASS")),
by ="FEMALE") |>
mutate(SL_FEMALE =SL,
MASS_FEMALE =MASS,
.keep ="unused") |>
mutate(SL_MIDPOINT = (SL_MALE+SL_FEMALE)/2,
MASS_MIDPOINT = (MASS_MALE+MASS_FEMALE)/2)
m2.5_df <- m2.5_df_all |>
group_by(CLUTCH_NUMBER) |>
mutate(MEDIAN_STANDARD_LENGTH = median(STANDARD_LENGTH)) |>
drop_na(MEDIAN_STANDARD_LENGTH) |>
ungroup() |>
select(-c("STANDARD_LENGTH","MASS.x")) |>
distinct() |>
mutate(MASS_MIDPOINT =coalesce(MASS_MIDPOINT, MASS_MALE),
SL_MIDPOINT =coalesce(SL_MIDPOINT, SL_MALE))
clutches <- m2.5_df |>
select(CLUTCH_NUMBER)
egg_df_all <- egg |>
left_join(select(adult, c("MALE", "SL", "MASS")),
by ="MALE") |>
mutate(SL_MALE =SL,
MASS_MALE =MASS,
.keep = "unused") |>
left_join(select(adult, c("FEMALE", "SL", "MASS")),
by ="FEMALE") |>
mutate(SL_FEMALE =SL,
MASS_FEMALE =MASS,
.keep ="unused") |>
mutate(SL_MIDPOINT = (SL_MALE+SL_FEMALE)/2,
MASS_MIDPOINT = (MASS_MALE+MASS_FEMALE)/2)
egg_df <- egg_df_all |>
group_by(CLUTCH_NUMBER) |>
mutate(MEDIAN_EGG_SIZE = median(EGG_SIZE)) |>
drop_na(MEDIAN_EGG_SIZE) |>
ungroup() |>
select(-c("EGG_SIZE","SAMPLE")) |>
distinct() |>
mutate(MASS_MIDPOINT =coalesce(MASS_MIDPOINT, MASS_MALE),
SL_MIDPOINT =coalesce(SL_MIDPOINT, SL_MALE),
SL_FEMALE =coalesce(SL_FEMALE, SL_MALE)) # done for two individuals
egg_df <- clutches |>
inner_join(egg_df, by="CLUTCH_NUMBER")plot1 <- ggplot(m1_df, aes(x=SL_MALE, y=MEDIAN_STANDARD_LENGTH, color=TEMPERATURE)) +
geom_point(alpha=0.05) +
stat_smooth(method = "lm") +
theme_classic()
plot2 <- ggplot(m1_df, aes(x=SL_FEMALE, y=MEDIAN_STANDARD_LENGTH, color=TEMPERATURE)) +
geom_point(alpha=0.05) +
stat_smooth(method = "lm") +
theme_classic()
plot3 <- ggplot(m1_df, aes(x=SL_MIDPOINT, y=MEDIAN_STANDARD_LENGTH, color=TEMPERATURE)) +
geom_point(alpha=0.05) +
stat_smooth(method = "lm") +
theme_classic()
ggarrange(plot1, plot2, plot3,
nrow =1,
ncol =3,
common.legend = TRUE)| POPULATION | 27 | 28.5 | 30 |
|---|---|---|---|
| Arlington Reef | 8 | 10 | 8 |
| Pretty patches | 4 | 6 | 6 |
| Sudbury Reef | 4 | 4 | 2 |
| Vlassof cay | 6 | 2 | 6 |
| POPULATION | 27 | 28.5 | 30 |
|---|---|---|---|
| Arlington Reef | 8 | 4 | 7 |
| Pretty Patches | 4 | 3 | 4 |
| Sudbury Reef | 4 | 2 | 2 |
| Vlassof Cay | 4 | 2 | 4 |
| POPULATION | 27 | 28.5 | 30 |
|---|---|---|---|
| Arlington Reef | 8 | 4 | 6 |
| Pretty Patches | 4 | 3 | 5 |
| Sudbury Reef | 4 | 2 | 1 |
| Vlassof Cay | 3 | 3 | 4 |
| POPULATION | 27 | 28.5 | 30 |
|---|---|---|---|
| Arlington Reef | 7 | 6 | 8 |
| Pretty Patches | 4 | 3 | 4 |
| Sudbury Reef | 3 | 2 | 2 |
| Vlassof Cay | 3 | 2 | 4 |
datasummary(Factor(TEMPERATURE) ~ SL * (NUnique + mean + median + min + max + sd + Histogram),
data = drop_na(adult, SL),
fmt = "%.2f")| TEMPERATURE | NUnique | mean | median | min | max | sd | Histogram |
|---|---|---|---|---|---|---|---|
| 27 | 21 | 97.14 | 100.60 | 84.76 | 104.42 | 7.00 | ▃▂▁▁▂▁▆▇ |
| 28.5 | 22 | 91.24 | 93.83 | 72.47 | 100.00 | 7.73 | ▁▂▂▃▆▇▃ |
| 30 | 22 | 92.52 | 93.22 | 80.34 | 101.22 | 6.44 | ▂▅▂▃▂▃▇▂▇▅ |
datasummary(Factor(TEMPERATURE) ~ MEDIAN_STANDARD_LENGTH * (NUnique + mean + median + min + max + sd + Histogram),
data = drop_na(m1_df, MEDIAN_STANDARD_LENGTH),
fmt = "%.2f")| TEMPERATURE | NUnique | mean | median | min | max | sd | Histogram |
|---|---|---|---|---|---|---|---|
| 27 | 20 | 12.96 | 13.00 | 9.59 | 15.98 | 1.56 | ▁▃▃▆▇▁▄▁▁ |
| 28.5 | 11 | 13.27 | 13.28 | 11.37 | 14.79 | 1.01 | ▂▂▂▇▅▂▅ |
| 30 | 17 | 13.53 | 13.10 | 11.29 | 15.62 | 1.34 | ▂▂▃▇▃▂▂▃▅ |
datasummary(Factor(TEMPERATURE) ~ MEDIAN_STANDARD_LENGTH * (NUnique + mean + median + min + max + sd + Histogram),
data = drop_na(m2_df, MEDIAN_STANDARD_LENGTH),
fmt = "%.2f")| TEMPERATURE | NUnique | mean | median | min | max | sd | Histogram |
|---|---|---|---|---|---|---|---|
| 27 | 19 | 20.71 | 20.90 | 18.50 | 22.16 | 0.89 | ▂▇▇▅▇▇▇▂ |
| 28.5 | 11 | 20.47 | 20.43 | 19.38 | 21.61 | 0.66 | ▅▂▂▇▅▂▂▂ |
| 30 | 16 | 20.37 | 20.54 | 18.64 | 21.70 | 0.93 | ▅▂▇▂▇▅▅▅ |
datasummary(Factor(TEMPERATURE) ~ MEDIAN_STANDARD_LENGTH * (NUnique + mean + median + min + max + sd + Histogram),
data = drop_na(m2.5_df, MEDIAN_STANDARD_LENGTH),
fmt = "%.2f")| TEMPERATURE | NUnique | mean | median | min | max | sd | Histogram |
|---|---|---|---|---|---|---|---|
| 27 | 17 | 24.65 | 24.58 | 20.65 | 28.43 | 1.75 | ▂▂▃▇▇▅▂▂ |
| 28.5 | 13 | 24.17 | 23.91 | 21.82 | 28.23 | 1.83 | ▇▇▅▅▂▂▂ |
| 30 | 18 | 23.86 | 23.80 | 22.37 | 25.50 | 0.88 | ▁▃▃▃▃▇▁▁▃ |
modelNULL <- glmmTMB(MEDIAN_EGG_SIZE ~ 1,
family=gaussian(),
data =egg_df)
model3 <- glmmTMB(MEDIAN_EGG_SIZE ~ (1|CLUTCH_ORDER),
family=gaussian(),
data = egg_df)
model4 <- glmmTMB(MEDIAN_EGG_SIZE ~ (1|REGION),
family=gaussian(),
data = egg_df)
model5 <- glmmTMB(MEDIAN_EGG_SIZE ~ (1|REGION) + (1|POPULATION),
family=gaussian(),
data = egg_df)
model6 <- glmmTMB(MEDIAN_EGG_SIZE ~ (1|CLUTCH_ORDER) + (1|REGION) + (1|POPULATION),
family=gaussian(),
data = egg_df) ## Warning in finalizeTMB(TMBStruc, obj, fit, h, data.tmb.old): Model convergence
## problem; non-positive-definite Hessian matrix. See vignette('troubleshooting')
modelNULL <- glmmTMB(MEDIAN_STANDARD_LENGTH ~ 1,
family=gaussian(),
data =m1_df)
model2 <- glmmTMB(MEDIAN_STANDARD_LENGTH ~ (1|LEVEL),
family=gaussian(),
data = m1_df)
model3 <- glmmTMB(MEDIAN_STANDARD_LENGTH ~ (1|CLUTCH_ORDER),
family=gaussian(),
data = m1_df)
model4 <- glmmTMB(MEDIAN_STANDARD_LENGTH ~ (1|REGION),
family=gaussian(),
data = m1_df)
model5 <- glmmTMB(MEDIAN_STANDARD_LENGTH ~ (1|REGION) + (1|POPULATION),
family=gaussian(),
data = m1_df)
model6 <- glmmTMB(MEDIAN_STANDARD_LENGTH ~ (1|CLUTCH_ORDER) + (1|REGION) + (1|POPULATION),
family=gaussian(),
data = m1_df) ## Warning in finalizeTMB(TMBStruc, obj, fit, h, data.tmb.old): Model convergence
## problem; non-positive-definite Hessian matrix. See vignette('troubleshooting')
## Warning in finalizeTMB(TMBStruc, obj, fit, h, data.tmb.old): Model convergence
## problem; singular convergence (7). See vignette('troubleshooting'),
## help('diagnose')
modelNULL <- glmmTMB(MEDIAN_STANDARD_LENGTH ~ 1,
family=gaussian(),
data =m2_df)
model2 <- glmmTMB(MEDIAN_STANDARD_LENGTH ~ (1|LEVEL),
family=gaussian(),
data = m2_df)
model3 <- glmmTMB(MEDIAN_STANDARD_LENGTH ~ (1|CLUTCH_ORDER),
family=gaussian(),
data = m2_df)
model4 <- glmmTMB(MEDIAN_STANDARD_LENGTH ~ (1|REGION),
family=gaussian(),
data = m2_df)
model5 <- glmmTMB(MEDIAN_STANDARD_LENGTH ~ (1|REGION) + (1|POPULATION),
family=gaussian(),
data = m2_df)
model6 <- glmmTMB(MEDIAN_STANDARD_LENGTH ~ (1|CLUTCH_ORDER) + (1|REGION) + (1|POPULATION),
family=gaussian(),
data = m2_df)
AIC(modelNULL, model2, model3, model4, model5, model6) modelNULL <- glmmTMB(MEDIAN_STANDARD_LENGTH ~ 1,
family=gaussian(),
data =m2.5_df)
model2 <- glmmTMB(MEDIAN_STANDARD_LENGTH ~ (1|LEVEL),
family=gaussian(),
data = m2.5_df)
model3 <- glmmTMB(MEDIAN_STANDARD_LENGTH ~ (1|CLUTCH_ORDER),
family=gaussian(),
data = m2.5_df)
model4 <- glmmTMB(MEDIAN_STANDARD_LENGTH ~ (1|REGION),
family=gaussian(),
data = m2.5_df)
model5 <- glmmTMB(MEDIAN_STANDARD_LENGTH ~ (1|REGION) + (1|POPULATION),
family=gaussian(),
data = m2.5_df) ## Warning in finalizeTMB(TMBStruc, obj, fit, h, data.tmb.old): Model convergence
## problem; singular convergence (7). See vignette('troubleshooting'),
## help('diagnose')
model6 <- glmmTMB(MEDIAN_STANDARD_LENGTH ~ (1|CLUTCH_ORDER) + (1|REGION) + (1|POPULATION),
family=gaussian(),
data = m2.5_df)
AIC(modelNULL, model2, model3, model4, model5, model6) For standard length measurements at differrent time periods, including 1, 2, and 2.5 months the best model is the most simply model (i.e., model1), where the only random factor that is present is CLUTCH_NUMBER.
Now that we have figured out which random factors will be included within out generalized linear mixed effects model we can start to explore different hypothesese by adding in our fixed factors - covariates.
Fixed factors that will be included will be those that are essential to answering the initial research question based on heiritability of traits between offspring and parental fish - labelled as MALE and FEMALE in the dataframe as well as their combined score MIDPOINT, if applicable. TEMPERATURE is also essential to answering the main research question that looks to see if heritability changes at different temperatures.
Our main research hypothesis will be modelled using the formula below”
An alternative research hypothesis will will test will include an interaction with PARENTAL_DAYS_IN_TEMPERATURE to see if heritability was affect by how long adults spent at experimental temperatures. This model may look something like:
Lets start fitting models:
Model1a was selected as the best model and will be used going forward.
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.888 0.272 0.304 0.12 0.228 0.816 0.676 0.236 0.208 0.228 0.968 0.34 0.708 0.36 0.116 1 0.236 0.596 0.552 0.3 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.12936, p-value = 0.4111
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0221, p-value = 0.872
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 1, observations = 47, p-value = 0.3134
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.0005385317 0.1129377171
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.0212766
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.12936, p-value = 0.4111
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0221, p-value = 0.872
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 1, observations = 47, p-value = 0.3134
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.0005385317 0.1129377171
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.0212766
## NOTE: Results may be misleading due to involvement in interactions
## Family: gaussian ( identity )
## Formula: MEDIAN_EGG_SIZE ~ scale(SL_FEMALE) * TEMPERATURE
## Data: egg_df
##
## AIC BIC logLik deviance df.resid
## -367.6 -354.7 190.8 -381.6 40
##
##
## Dispersion estimate for gaussian family (sigma^2): 1.74e-05
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.0506058 0.0010821 46.77 < 2e-16 ***
## scale(SL_FEMALE) 0.0046635 0.0010739 4.34 1.41e-05 ***
## TEMPERATURE28.5 -0.0026571 0.0015882 -1.67 0.0943 .
## TEMPERATURE30 -0.0059801 0.0015091 -3.96 7.41e-05 ***
## scale(SL_FEMALE):TEMPERATURE28.5 -0.0025129 0.0015091 -1.67 0.0959 .
## scale(SL_FEMALE):TEMPERATURE30 -0.0009045 0.0016248 -0.56 0.5777
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 0.048484861 0.0527266576 0.0506057592
## scale(SL_FEMALE) 0.002558663 0.0067683685 0.0046635158
## TEMPERATURE28.5 -0.005769969 0.0004557455 -0.0026571120
## TEMPERATURE30 -0.008937973 -0.0030223022 -0.0059801378
## scale(SL_FEMALE):TEMPERATURE28.5 -0.005470735 0.0004449323 -0.0025129016
## scale(SL_FEMALE):TEMPERATURE30 -0.004089068 0.0022800224 -0.0009045229
## Random effect variances not available. Returned R2 does not account for random effects.
## # R2 for Mixed Models
##
## Conditional R2: NA
## Marginal R2: 0.581
egg.emm <- emmeans(model1a, ~ SL_FEMALE*TEMPERATURE,
at =list(SL_FEMALE=seq(from =min(egg_df$SL_FEMALE), to =max(egg_df$SL_FEMALE), by=.25)))
egg.df <- as.data.frame(egg.emm)
egg.obs <- drop_na(egg_df, SL_FEMALE, MEDIAN_EGG_SIZE) |>
mutate(Pred =predict(model1a, re.form=NA, type ='response'),
Resid =residuals(model1a, type ='response'),
Fit =Pred+Resid)
egg.obs.summarize <- egg.obs |>
group_by(CLUTCH_NUMBER, TEMPERATURE) |>
summarise(mean.sl =mean(Fit, na.rm=TRUE),
mean.sl.male =mean(SL_MALE, na.rm = TRUE),
sd.sl =sd(Fit, na.rm =TRUE),
n.sl = n()) |>
mutate(se.sl = sd.sl / sqrt(n.sl),
lower.ci.sl =mean.sl - qt(1-(0.05/2), n.sl -1) * se.sl,
upper.ci.sl =mean.sl + qt(1-(0.05/2), n.sl -1) * se.sl) |>
ungroup()## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
## Warning: There were 90 warnings in `mutate()`.
## The first warning was:
## ℹ In argument: `lower.ci.sl = mean.sl - qt(1 - (0.05/2), n.sl - 1) * se.sl`.
## ℹ In group 1: `CLUTCH_NUMBER = 38`.
## Caused by warning in `qt()`:
## ! NaNs produced
## ℹ Run `dplyr::last_dplyr_warnings()` to see the 89 remaining warnings.
ggplot(data = egg.df, aes(x=SL_FEMALE, y=emmean)) +
stat_smooth(aes(color=TEMPERATURE),
method = "lm") +
geom_point(data = egg.obs, aes(x =SL_FEMALE,y =Fit,
color = TEMPERATURE),
size=3) +
scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) +
scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
facet_wrap(~TEMPERATURE)+
xlab("PARENTAL MALE STANDARD LENGTH (mm)") +
ylab("Egg size (mm^2)") +
ggtitle("Offspring-male relationship") +
theme_classic()## `geom_smooth()` using formula = 'y ~ x'
#### Slopes
add.df <- split(egg.obs, egg.obs$TEMPERATURE) |>
map(~lm(Fit ~ SL_FEMALE, data=.)) |>
map(summary) |>
map_dbl("r.squared") |>
as.data.frame()
add.df <- add.df |>
mutate(TEMPERATURE = row.names(add.df)) |>
rename(r.sqaured =names(add.df)[1])
df.results.egg <- egg.df |>
group_by(TEMPERATURE) |>
do({
mod = lm(emmean ~ SL_FEMALE, data = .)
data.frame(group = "1-months",
Slope = coef(mod)[2],
Heritability = coef(mod)[2]*2)
}) |>
as.data.frame() |>
mutate(Heritability = case_when(Heritability <=0 ~ 0,
TRUE ~ Heritability)) |>
inner_join(add.df, by="TEMPERATURE"); df.results.eggModel1a was selected as the best model and will be used going forward.
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.684 0.212 0.912 0.436 0.48 0.964 0.932 0.128 0.368 0.896 0.988 0.032 0.416 0.032 0.568 0.876 0.372 0.352 0.508 0.36 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.1265, p-value = 0.4261
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0217, p-value = 0.88
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 48, p-value = 1
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.00000000 0.07397279
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.1265, p-value = 0.4261
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0217, p-value = 0.88
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 48, p-value = 1
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.00000000 0.07397279
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0
## NOTE: Results may be misleading due to involvement in interactions
## Family: gaussian ( identity )
## Formula:
## MEDIAN_STANDARD_LENGTH ~ scale(SL_MALE) * TEMPERATURE + scale(DENSITY)
## Data: m1_df
##
## AIC BIC logLik deviance df.resid
## 170.7 185.6 -77.3 154.7 40
##
##
## Dispersion estimate for gaussian family (sigma^2): 1.47
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 12.62085 0.31237 40.40 <2e-16 ***
## scale(SL_MALE) 0.76076 0.33843 2.25 0.0246 *
## TEMPERATURE28.5 0.84838 0.48944 1.73 0.0830 .
## TEMPERATURE30 0.97231 0.44764 2.17 0.0298 *
## scale(DENSITY) 0.04827 0.19495 0.25 0.8044
## scale(SL_MALE):TEMPERATURE28.5 0.02267 0.57029 0.04 0.9683
## scale(SL_MALE):TEMPERATURE30 -0.56947 0.44716 -1.27 0.2028
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 12.00861102 13.2330982 12.62085459
## scale(SL_MALE) 0.09744413 1.4240682 0.76075615
## TEMPERATURE28.5 -0.11090685 1.8076756 0.84838436
## TEMPERATURE30 0.09495686 1.8496614 0.97230914
## scale(DENSITY) -0.33381364 0.4303635 0.04827495
## scale(SL_MALE):TEMPERATURE28.5 -1.09509188 1.1404227 0.02266539
## scale(SL_MALE):TEMPERATURE30 -1.44589249 0.3069476 -0.56947246
## Random effect variances not available. Returned R2 does not account for random effects.
## # R2 for Mixed Models
##
## Conditional R2: NA
## Marginal R2: 0.204
m1.sl <- emmeans(model1a, ~ SL_MALE*TEMPERATURE,
at =list(SL_MALE=seq(from =min(m1_df$SL_MALE), to =max(m1_df$SL_MALE), by=.25)))
m1.sl.df <- as.data.frame(m1.sl)
m1.sl.obs <- drop_na(m1_df, SL_MALE, MEDIAN_STANDARD_LENGTH) |>
mutate(Pred =predict(model1a, re.form=NA, type ='response'),
Resid =residuals(model1a, type ='response'),
Fit =Pred+Resid)
m1.sl.obs.summarize <- m1.sl.obs |>
group_by(CLUTCH_NUMBER, TEMPERATURE) |>
summarise(mean.sl =mean(Fit, na.rm=TRUE),
mean.sl.male =mean(SL_MALE, na.rm = TRUE),
sd.sl =sd(Fit, na.rm =TRUE),
n.sl = n()) |>
mutate(se.sl = sd.sl / sqrt(n.sl),
lower.ci.sl =mean.sl - qt(1-(0.05/2), n.sl -1) * se.sl,
upper.ci.sl =mean.sl + qt(1-(0.05/2), n.sl -1) * se.sl) |>
ungroup()## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
## Warning: There were 96 warnings in `mutate()`.
## The first warning was:
## ℹ In argument: `lower.ci.sl = mean.sl - qt(1 - (0.05/2), n.sl - 1) * se.sl`.
## ℹ In group 1: `CLUTCH_NUMBER = 38`.
## Caused by warning in `qt()`:
## ! NaNs produced
## ℹ Run `dplyr::last_dplyr_warnings()` to see the 95 remaining warnings.
ggplot(data = m1.sl.df, aes(x=SL_MALE, y=emmean)) +
stat_smooth(aes(color=TEMPERATURE),
method = "lm") +
geom_pointrange(data = m1.sl.obs.summarize, aes(x =mean.sl.male,
y =mean.sl,
ymin =lower.ci.sl,
ymax =upper.ci.sl,
color = TEMPERATURE)) +
scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) +
scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
facet_wrap(~TEMPERATURE)+
xlab("PARENTAL MALE STANDARD LENGTH (mm)") +
ylab("OFFSPRING STANDARD LENGTH (mm)") +
ggtitle("Offspring-male relationship") +
theme_classic()## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 20 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Warning: Removed 11 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Warning: Removed 17 rows containing missing values or values outside the scale range
## (`geom_segment()`).
add.df <- split(m1.sl.obs, m1.sl.obs$TEMPERATURE) |>
map(~lm(Fit ~ SL_MALE, data=.)) |>
map(summary) |>
map_dbl("r.squared") |>
as.data.frame()
add.df <- add.df |>
mutate(TEMPERATURE = row.names(add.df)) |>
rename(r.sqaured =names(add.df)[1])
df.results.1 <- m1.sl.df |>
group_by(TEMPERATURE) |>
do({
mod = lm(emmean ~ SL_MALE, data = .)
data.frame(group = "1-months",
Slope = coef(mod)[2],
Heritability = coef(mod)[2]*2)
}) |>
as.data.frame() |>
mutate(Heritability = case_when(Heritability <=0 ~ 0,
TRUE ~ Heritability)) |>
inner_join(add.df, by="TEMPERATURE"); df.results.1The null model appears better than the models that we used. Let’s explore the data bit more and see if we can find a reason for this. Let’s start by looking at a basic histogram of our data.
There appears to be a left skew within our data. Let’s see if this can be better modelled with a Gamma distribution. If not we can try to incorporate transformations to our response variable. The model validations below also could use some improving.
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.436 0.012 0.18 0.784 0.828 0.972 0.128 0.88 0.592 0.72 0.896 0.74 0.912 0.788 0.42 0.98 0.48 0.3 0.688 0.492 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.094809, p-value = 0.7921
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0221, p-value = 0.872
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 47, p-value = 1
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.00000000 0.07548573
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.094809, p-value = 0.7921
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0221, p-value = 0.872
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 47, p-value = 1
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.00000000 0.07548573
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0
## `check_outliers()` does not yet support models of class `glmmTMB`.
## NOTE: Results may be misleading due to involvement in interactions
## Family: gaussian ( identity )
## Formula:
## MEDIAN_STANDARD_LENGTH ~ scale(SL_MALE) * TEMPERATURE + scale(DENSITY)
## Data: m2_df
##
## AIC BIC logLik deviance df.resid
## 129.7 144.5 -56.9 113.7 39
##
##
## Dispersion estimate for gaussian family (sigma^2): 0.658
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 20.66279 0.21809 94.74 <2e-16 ***
## scale(SL_MALE) 0.04010 0.22479 0.18 0.858
## TEMPERATURE28.5 -0.15598 0.34706 -0.45 0.653
## TEMPERATURE30 -0.28519 0.30695 -0.93 0.353
## scale(DENSITY) -0.14427 0.14144 -1.02 0.308
## scale(SL_MALE):TEMPERATURE28.5 -0.16642 0.37518 -0.44 0.657
## scale(SL_MALE):TEMPERATURE30 0.03756 0.28694 0.13 0.896
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 20.2353472 21.0902425 20.66279484
## scale(SL_MALE) -0.4004901 0.4806866 0.04009825
## TEMPERATURE28.5 -0.8362060 0.5242419 -0.15598209
## TEMPERATURE30 -0.8867982 0.3164111 -0.28519359
## scale(DENSITY) -0.4214972 0.1329480 -0.14427459
## scale(SL_MALE):TEMPERATURE28.5 -0.9017489 0.5689105 -0.16641919
## scale(SL_MALE):TEMPERATURE30 -0.5248359 0.5999473 0.03755567
## Random effect variances not available. Returned R2 does not account for random effects.
## # R2 for Mixed Models
##
## Conditional R2: NA
## Marginal R2: 0.060
m2.sl <- emmeans(model1a, ~ SL_MALE*TEMPERATURE,
at =list(SL_MALE=seq(from =min(m2_df$SL_MALE), to =max(m2_df$SL_MALE), by=.25)))
m2.sl.df <- as.data.frame(m2.sl)
m2.sl.obs <- drop_na(m2_df, SL_MALE, MEDIAN_STANDARD_LENGTH) |>
mutate(Pred =predict(model1a, re.form=NA, type ='response'),
Resid =residuals(model1a, type ='response'),
Fit =Pred+Resid)
m2.sl.obs.summarize <- m2.sl.obs |>
group_by(CLUTCH_NUMBER, TEMPERATURE) |>
summarise(mean.sl =mean(MEDIAN_STANDARD_LENGTH, na.rm=TRUE),
mean.sl.male =mean(SL_MALE, na.rm = TRUE),
sd.sl =sd(MEDIAN_STANDARD_LENGTH, na.rm =TRUE),
n.sl = n()) |>
mutate(se.sl = sd.sl / sqrt(n.sl),
lower.ci.sl =mean.sl - qt(1-(0.05/2), n.sl -1) * se.sl,
upper.ci.sl =mean.sl + qt(1-(0.05/2), n.sl -1) * se.sl) |>
ungroup()## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
## Warning: There were 90 warnings in `mutate()`.
## The first warning was:
## ℹ In argument: `lower.ci.sl = mean.sl - qt(1 - (0.05/2), n.sl - 1) * se.sl`.
## ℹ In group 1: `CLUTCH_NUMBER = 38`.
## Caused by warning in `qt()`:
## ! NaNs produced
## ℹ Run `dplyr::last_dplyr_warnings()` to see the 89 remaining warnings.
ggplot(data = m2.sl.df, aes(x=SL_MALE, y=emmean)) +
stat_smooth(aes(color=TEMPERATURE),
method = "lm",
formula = y ~ x) +
geom_pointrange(data = m2.sl.obs.summarize, aes(x =mean.sl.male,
y =mean.sl,
ymin =lower.ci.sl,
ymax =upper.ci.sl,
color = TEMPERATURE)) +
scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) +
scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
facet_wrap(~TEMPERATURE)+
xlab("PARENTAL MALE STANDARD LENGTH (mm)") +
ylab("OFFSPRING STANDARD LENGTH (mm)") +
ggtitle("Offspring-male relationship") +
theme_classic()## Warning: Removed 19 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Warning: Removed 10 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Warning: Removed 16 rows containing missing values or values outside the scale range
## (`geom_segment()`).
add.df <- split(m2.sl.obs, m2.sl.obs$TEMPERATURE) |>
map(~lm(Fit ~ SL_MALE, data=.)) |>
map(summary) |>
map_dbl("r.squared") |>
as.data.frame()
add.df <- add.df |>
mutate(TEMPERATURE = row.names(add.df)) |>
rename(r.sqaured =names(add.df)[1])
df.results.2 <- m2.sl.df |>
group_by(TEMPERATURE) |>
do({
mod = lm(emmean ~ SL_MALE, data = .)
data.frame(group = "2-months",
Slope = coef(mod)[2],
Heritability = coef(mod)[2]*2)
}) |>
as.data.frame() |>
mutate(Heritability = case_when(Heritability <=0 ~ 0,
TRUE ~ Heritability)) |>
inner_join(add.df, by="TEMPERATURE"); df.results.2## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.988 0.78 0.192 0.732 0.948 0.684 0.784 0.72 0.528 0.924 0.968 0.904 0.552 0.26 0.856 0.78 0.72 0.52 0.884 0.58 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.0975, p-value = 0.7515
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0217, p-value = 0.88
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 48, p-value = 1
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.00000000 0.07397279
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.0975, p-value = 0.7515
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0217, p-value = 0.88
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 48, p-value = 1
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.00000000 0.07397279
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0
## `check_outliers()` does not yet support models of class `glmmTMB`.
## NOTE: Results may be misleading due to involvement in interactions
## Family: gaussian ( identity )
## Formula:
## MEDIAN_STANDARD_LENGTH ~ scale(SL_MALE) * TEMPERATURE + scale(DENSITY) +
## scale(as.numeric(AGE_DAYS))
## Data: m2.5_df
##
## AIC BIC logLik deviance df.resid
## 160.7 177.5 -71.4 142.7 39
##
##
## Dispersion estimate for gaussian family (sigma^2): 1.14
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 24.3834 0.3003 81.21 < 2e-16 ***
## scale(SL_MALE) 0.6923 0.2898 2.39 0.01688 *
## TEMPERATURE28.5 -0.5533 0.4320 -1.28 0.20029
## TEMPERATURE30 -0.3569 0.4034 -0.88 0.37632
## scale(DENSITY) -0.9072 0.1689 -5.37 7.82e-08 ***
## scale(as.numeric(AGE_DAYS)) 0.1600 0.1661 0.96 0.33543
## scale(SL_MALE):TEMPERATURE28.5 -1.3627 0.5069 -2.69 0.00718 **
## scale(SL_MALE):TEMPERATURE30 -0.4976 0.3799 -1.31 0.19022
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 23.7949446 24.9719280 24.3834363
## scale(SL_MALE) 0.1243802 1.2602537 0.6923170
## TEMPERATURE28.5 -1.4000509 0.2934420 -0.5533045
## TEMPERATURE30 -1.1475694 0.4337750 -0.3568972
## scale(DENSITY) -1.2381920 -0.5761345 -0.9071633
## scale(as.numeric(AGE_DAYS)) -0.1655686 0.4855803 0.1600058
## scale(SL_MALE):TEMPERATURE28.5 -2.3562160 -0.3691784 -1.3626972
## scale(SL_MALE):TEMPERATURE30 -1.2421032 0.2469264 -0.4975884
## Random effect variances not available. Returned R2 does not account for random effects.
## # R2 for Mixed Models
##
## Conditional R2: NA
## Marginal R2: 0.497
m2.5.sl <- emmeans(model1a, ~ SL_MALE*TEMPERATURE,
at =list(SL_MALE=seq(from =min(m2.5_df$SL_MALE), to =max(m2.5_df$SL_MALE), by=.25)))
m2.5.sl.df <- as.data.frame(m2.5.sl)
m2.5.sl.obs <- drop_na(m2.5_df, SL_MALE, MEDIAN_STANDARD_LENGTH) |>
mutate(Pred =predict(model1a, re.form=NA, type ='response'),
Resid =residuals(model1a, type ='response'),
Fit =Pred+Resid)
m2.5.sl.obs.summarize <- m2.5.sl.obs |>
group_by(CLUTCH_NUMBER, TEMPERATURE) |>
summarise(mean.sl =mean(Fit, na.rm=TRUE),
mean.sl.male =mean(SL_MALE, na.rm = TRUE),
sd.sl =sd(Fit, na.rm =TRUE),
n.sl = n()) |>
mutate(se.sl = sd.sl / sqrt(n.sl),
lower.ci.sl =mean.sl - qt(1-(0.05/2), n.sl -1) * se.sl,
upper.ci.sl =mean.sl + qt(1-(0.05/2), n.sl -1) * se.sl) |>
ungroup()## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
## Warning: There were 96 warnings in `mutate()`.
## The first warning was:
## ℹ In argument: `lower.ci.sl = mean.sl - qt(1 - (0.05/2), n.sl - 1) * se.sl`.
## ℹ In group 1: `CLUTCH_NUMBER = 38`.
## Caused by warning in `qt()`:
## ! NaNs produced
## ℹ Run `dplyr::last_dplyr_warnings()` to see the 95 remaining warnings.
ggplot(data = m2.5.sl.df, aes(x=SL_MALE, y=emmean)) +
stat_smooth(aes(color=TEMPERATURE),
method = "lm",
formula = y ~ x) +
geom_point(data = m2.5.sl.obs, aes(x =SL_MALE, y =Fit,
color = TEMPERATURE),
size=3) +
scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) +
scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
facet_wrap(~TEMPERATURE)+
xlab("PARENTAL MALE STANDARD LENGTH (mm)") +
ylab("OFFSPRING STANDARD LENGTH (mm)") +
ggtitle("Offspring-male relationship") +
theme_classic()add.df <- split(m2.5.sl.obs, m2.5.sl.obs$TEMPERATURE) |>
map(~lm(Fit ~ SL_MALE, data=.)) |>
map(summary) |>
map_dbl("r.squared") |>
as.data.frame()
add.df <- add.df |>
mutate(TEMPERATURE = row.names(add.df)) |>
rename(r.sqaured =names(add.df)[1])
df.results.2.5 <- m2.5.sl.df |>
group_by(TEMPERATURE) |>
do({
mod = lm(emmean ~ SL_MALE, data = .)
data.frame(group = "2.5-months",
Slope = coef(mod)[2],
Heritability = coef(mod)[2]*2)
}) |>
as.data.frame() |>
mutate(Heritability = case_when(Heritability <=0 ~ 0,
TRUE ~ Heritability)) |>
inner_join(add.df, by="TEMPERATURE"); df.results.2.5df.blup <- rbind(m1_df_all,m2_df_all) |>
select(-c(SAMPLE_NO)) |>
rbind(select(m2.5_df_all, -c(AGE_DAYS))) |>
mutate(SL_MALE_GROUP =scale(SL_MALE),
fSL_MALE_GROUP =factor(scale(SL_MALE))) |>
drop_na(STANDARD_LENGTH) |>
filter(TEMPERATURE == "30")
model1a.blup <- glmmTMB(STANDARD_LENGTH ~ 1 + as.numeric(AGE) + scale(DENSITY, scale=TRUE, center=TRUE) + (1|fSL_MALE_GROUP),
family=gaussian(),
data=df.blup)
summary(model1a.blup)## Family: gaussian ( identity )
## Formula:
## STANDARD_LENGTH ~ 1 + as.numeric(AGE) + scale(DENSITY, scale = TRUE,
## center = TRUE) + (1 | fSL_MALE_GROUP)
## Data: df.blup
##
## AIC BIC logLik deviance df.resid
## 6670.1 6696.6 -3330.1 6660.1 1457
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## fSL_MALE_GROUP (Intercept) 0.3807 0.617
## Residual 5.4767 2.340
## Number of obs: 1462, groups: fSL_MALE_GROUP, 11
##
## Dispersion estimate for gaussian family (sigma^2): 5.48
##
## Conditional model:
## Estimate Std. Error z value
## (Intercept) 8.94229 0.25084 35.65
## as.numeric(AGE) 5.02679 0.07821 64.27
## scale(DENSITY, scale = TRUE, center = TRUE) -0.05281 0.07091 -0.74
## Pr(>|z|)
## (Intercept) <2e-16 ***
## as.numeric(AGE) <2e-16 ***
## scale(DENSITY, scale = TRUE, center = TRUE) 0.456
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Warning: 'r.squaredGLMM' now calculates a revised statistic. See the help page.
## Warning in r.squaredGLMM.glmmTMB(model1a.blup): the effects of zero-inflation
## and dispersion model are ignored
## R2m R2c
## [1,] 0.7500111 0.7662577
model2a.blup <- glmmTMB(STANDARD_LENGTH ~ 1 + poly(as.numeric(AGE, 2, raw=TRUE)) + scale(DENSITY, scale=TRUE, center=TRUE) + (1|fSL_MALE_GROUP),
family=gaussian(),
data=df.blup)
summary(model2a.blup)## Family: gaussian ( identity )
## Formula: STANDARD_LENGTH ~ 1 + poly(as.numeric(AGE, 2, raw = TRUE)) +
## scale(DENSITY, scale = TRUE, center = TRUE) + (1 | fSL_MALE_GROUP)
## Data: df.blup
##
## AIC BIC logLik deviance df.resid
## 6670.1 6696.6 -3330.1 6660.1 1457
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## fSL_MALE_GROUP (Intercept) 0.3807 0.617
## Residual 5.4767 2.340
## Number of obs: 1462, groups: fSL_MALE_GROUP, 11
##
## Dispersion estimate for gaussian family (sigma^2): 5.48
##
## Conditional model:
## Estimate Std. Error z value
## (Intercept) 18.96150 0.19705 96.23
## poly(as.numeric(AGE, 2, raw = TRUE)) 159.59028 2.48314 64.27
## scale(DENSITY, scale = TRUE, center = TRUE) -0.05281 0.07091 -0.74
## Pr(>|z|)
## (Intercept) <2e-16 ***
## poly(as.numeric(AGE, 2, raw = TRUE)) <2e-16 ***
## scale(DENSITY, scale = TRUE, center = TRUE) 0.456
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Warning in r.squaredGLMM.glmmTMB(model2a.blup): the effects of zero-inflation
## and dispersion model are ignored
## R2m R2c
## [1,] 0.7500109 0.7662576
model3a.blup <- glmmTMB(STANDARD_LENGTH ~ 1 + as.numeric(AGE) + scale(DENSITY, center=TRUE) + (1+as.numeric(AGE)|fSL_MALE_GROUP),
family=gaussian(),
data=df.blup)
summary(model3a.blup)## Family: gaussian ( identity )
## Formula:
## STANDARD_LENGTH ~ 1 + as.numeric(AGE) + scale(DENSITY, center = TRUE) +
## (1 + as.numeric(AGE) | fSL_MALE_GROUP)
## Data: df.blup
##
## AIC BIC logLik deviance df.resid
## 6674.1 6711.1 -3330.1 6660.1 1455
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev. Corr
## fSL_MALE_GROUP (Intercept) 3.807e-01 6.170e-01
## as.numeric(AGE) 1.628e-15 4.034e-08 1.00
## Residual 5.477e+00 2.340e+00
## Number of obs: 1462, groups: fSL_MALE_GROUP, 11
##
## Dispersion estimate for gaussian family (sigma^2): 5.48
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 8.94227 0.25084 35.65 <2e-16 ***
## as.numeric(AGE) 5.02680 0.07821 64.27 <2e-16 ***
## scale(DENSITY, center = TRUE) -0.05281 0.07091 -0.74 0.456
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Warning in r.squaredGLMM.glmmTMB(model3a.blup): the effects of zero-inflation
## and dispersion model are ignored
## R2m R2c
## [1,] 0.750012 0.7662587
genotype_blups <- ranef(model1a.blup)
genotype_blups$cond$fSL_MALE_GROUP$male_size <- row.names(genotype_blups)
genotype_index <- as.data.frame(as.factor(c(1:11)))
genotype_data <- cbind(genotype_index, genotype_blups$cond$fSL_MALE_GROUP, row.names(genotype_blups$cond$fSL_MALE_GROUP))
colnames(genotype_data) <- c("sample", "BLUP_int", "genotype") genotype_data$genotype_ordered <- factor(genotype_data$genotype,
levels = genotype_data$genotype[order(genotype_data$BLUP_int)])
ggplot(genotype_data, aes(genotype_ordered, BLUP_int)) +
geom_point(aes(group = genotype, colour = genotype), size = 4) +
ylab("BLUP intercept estimate") +
geom_hline(yintercept = 0, lty = 2) + theme_classic() +
theme(axis.text.x = element_text(angle =90, vjust=0.5, hjust=1)) genotype_data$genotype_ordered <- factor(genotype_data$genotype,
levels = genotype_data$genotype[order(genotype_data$BLUP_int)])
genotype_data$genotype <- row.names(genotype_data)
ggplot(genotype_data, aes(genotype_ordered, BLUP_int)) +
geom_bar(stat = "identity", aes(group = genotype, fill = genotype)) +
xlab("Genotype (in ranked order of plasticity)") +
ylab("Plasticity (BLUP slope estimate)") +
theme_classic() +
theme(axis.text.x = element_text(angle =90, vjust=0.5, hjust=1))Model1a was selected as the best model and will be used going forward.
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.884 0.32 0.356 0.188 0.264 0.808 0.756 0.22 0.428 0.224 0.968 0.36 0.648 0.456 0.092 1 0.344 0.564 0.528 0.2 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.10885, p-value = 0.6335
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0221, p-value = 0.872
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 1, observations = 47, p-value = 0.3134
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.0005385317 0.1129377171
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.0212766
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.10885, p-value = 0.6335
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0221, p-value = 0.872
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 1, observations = 47, p-value = 0.3134
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.0005385317 0.1129377171
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.0212766
## `check_outliers()` does not yet support models of class `glmmTMB`.
## NOTE: Results may be misleading due to involvement in interactions
## Family: gaussian ( identity )
## Formula: MEDIAN_EGG_SIZE ~ scale(SL_MIDPOINT) * TEMPERATURE
## Data: egg_df
##
## AIC BIC logLik deviance df.resid
## -369.9 -357.0 192.0 -383.9 40
##
##
## Dispersion estimate for gaussian family (sigma^2): 1.66e-05
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.050260 0.001085 46.32 < 2e-16 ***
## scale(SL_MIDPOINT) 0.004576 0.001025 4.47 7.99e-06 ***
## TEMPERATURE28.5 -0.002262 0.001562 -1.45 0.147686
## TEMPERATURE30 -0.005124 0.001516 -3.38 0.000723 ***
## scale(SL_MIDPOINT):TEMPERATURE28.5 -0.001476 0.001595 -0.93 0.354920
## scale(SL_MIDPOINT):TEMPERATURE30 -0.001109 0.001495 -0.74 0.458160
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 0.048132957 0.0523862168 0.050259587
## scale(SL_MIDPOINT) 0.002567232 0.0065838222 0.004575527
## TEMPERATURE28.5 -0.005324021 0.0008002473 -0.002261887
## TEMPERATURE30 -0.008094070 -0.0021530564 -0.005123563
## scale(SL_MIDPOINT):TEMPERATURE28.5 -0.004602412 0.0016508979 -0.001475757
## scale(SL_MIDPOINT):TEMPERATURE30 -0.004040208 0.0018214638 -0.001109372
## Random effect variances not available. Returned R2 does not account for random effects.
## # R2 for Mixed Models
##
## Conditional R2: NA
## Marginal R2: 0.601
egg.sl <- emmeans(model1a, ~ SL_MIDPOINT*TEMPERATURE,
at =list(SL_MIDPOINT=seq(from =min(egg_df$SL_MIDPOINT), to =max(egg_df$SL_MIDPOINT), by=.25)))
egg.sl.df <- as.data.frame(egg.sl)
egg.sl.obs <- drop_na(egg_df, SL_MIDPOINT, MEDIAN_EGG_SIZE) |>
mutate(Pred =predict(model1a, re.form=NA, type ='response'),
Resid =residuals(model1a, type ='response'),
Fit =Pred+Resid)
egg.sl.obs.summarize <- egg.sl.obs |>
group_by(CLUTCH_NUMBER, TEMPERATURE) |>
summarise(mean.sl =mean(Fit, na.rm=TRUE),
mean.sl.female =mean(SL_MIDPOINT, na.rm = TRUE),
sd.sl =sd(Fit, na.rm =TRUE),
n.sl = n()) |>
mutate(se.sl = sd.sl / sqrt(n.sl),
lower.ci.sl =mean.sl - qt(1-(0.05/2), n.sl -1) * se.sl,
upper.ci.sl =mean.sl + qt(1-(0.05/2), n.sl -1) * se.sl) |>
ungroup()## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
## Warning: There were 90 warnings in `mutate()`.
## The first warning was:
## ℹ In argument: `lower.ci.sl = mean.sl - qt(1 - (0.05/2), n.sl - 1) * se.sl`.
## ℹ In group 1: `CLUTCH_NUMBER = 38`.
## Caused by warning in `qt()`:
## ! NaNs produced
## ℹ Run `dplyr::last_dplyr_warnings()` to see the 89 remaining warnings.
ggplot(data = egg.sl.df, aes(x=SL_MIDPOINT, y=emmean)) +
stat_smooth(aes(color=TEMPERATURE),
method = "lm") +
geom_pointrange(data = egg.sl.obs.summarize, aes(x =mean.sl.female,
y =mean.sl,
ymin =lower.ci.sl,
ymax =upper.ci.sl,
color = TEMPERATURE)) +
scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) +
scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
facet_wrap(~TEMPERATURE)+
xlab("PARENTAL MIDPOINT STANDARD LENGTH (mm)") +
ylab("OFFSPRING STANDARD LENGTH (mm)") +
ggtitle("Offspring-male relationship") +
theme_classic()## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 17 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Warning: Removed 14 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Removed 14 rows containing missing values or values outside the scale range
## (`geom_segment()`).
Model1a was selected as the best model and will be used going forward.
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.732 0.26 0.908 0.328 0.404 0.964 0.928 0.136 0.28 0.896 0.984 0.024 0.476 0.032 0.484 0.88 0.372 0.372 0.456 0.348 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.13767, p-value = 0.3229
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0217, p-value = 0.88
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 48, p-value = 1
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.00000000 0.07397279
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.13767, p-value = 0.3229
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0217, p-value = 0.88
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 48, p-value = 1
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.00000000 0.07397279
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0
## `check_outliers()` does not yet support models of class `glmmTMB`.
## NOTE: Results may be misleading due to involvement in interactions
## Family: gaussian ( identity )
## Formula:
## MEDIAN_STANDARD_LENGTH ~ scale(SL_MIDPOINT) * TEMPERATURE + scale(DENSITY)
## Data: m1_df
##
## AIC BIC logLik deviance df.resid
## 172.5 187.5 -78.3 156.5 40
##
##
## Dispersion estimate for gaussian family (sigma^2): 1.53
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 12.72831 0.30035 42.38 <2e-16 ***
## scale(SL_MIDPOINT) 0.64654 0.31643 2.04 0.0410 *
## TEMPERATURE28.5 0.74690 0.49619 1.51 0.1323
## TEMPERATURE30 0.83757 0.43483 1.93 0.0541 .
## scale(DENSITY) 0.02259 0.20137 0.11 0.9107
## scale(SL_MIDPOINT):TEMPERATURE28.5 -0.07417 0.49651 -0.15 0.8812
## scale(SL_MIDPOINT):TEMPERATURE30 -0.44748 0.45455 -0.98 0.3249
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 12.13963002 13.3169916 12.72831083
## scale(SL_MIDPOINT) 0.02635081 1.2667243 0.64653757
## TEMPERATURE28.5 -0.22561197 1.7194193 0.74690367
## TEMPERATURE30 -0.01468738 1.6898215 0.83756708
## scale(DENSITY) -0.37208520 0.4172682 0.02259152
## scale(SL_MIDPOINT):TEMPERATURE28.5 -1.04731009 0.8989629 -0.07417362
## scale(SL_MIDPOINT):TEMPERATURE30 -1.33837535 0.4434223 -0.44747652
## Random effect variances not available. Returned R2 does not account for random effects.
## # R2 for Mixed Models
##
## Conditional R2: NA
## Marginal R2: 0.171
m1_df <-
m1_df |>
drop_na(SL_MIDPOINT)
m1.sl <- emmeans(model1a, ~ SL_MIDPOINT*TEMPERATURE,
at =list(SL_MIDPOINT=seq(from =min(m1_df$SL_MIDPOINT), to =max(m1_df$SL_MIDPOINT), by=.25)))
m1.sl.df <- as.data.frame(m1.sl)
m1.sl.obs <- drop_na(m1_df, SL_MIDPOINT, MEDIAN_STANDARD_LENGTH) |>
mutate(Pred =predict(model1a, re.form=NA, type ='response'),
Resid =residuals(model1a, type ='response'),
Fit =Pred+Resid)
m1.sl.obs.summarize <- m1.sl.obs |>
group_by(CLUTCH_NUMBER, TEMPERATURE) |>
summarise(mean.sl =mean(Fit, na.rm=TRUE),
mean.sl.female =mean(SL_MIDPOINT, na.rm = TRUE),
sd.sl =sd(Fit, na.rm =TRUE),
n.sl = n()) |>
mutate(se.sl = sd.sl / sqrt(n.sl),
lower.ci.sl =mean.sl - qt(1-(0.05/2), n.sl -1) * se.sl,
upper.ci.sl =mean.sl + qt(1-(0.05/2), n.sl -1) * se.sl) |>
ungroup()## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
## Warning: There were 96 warnings in `mutate()`.
## The first warning was:
## ℹ In argument: `lower.ci.sl = mean.sl - qt(1 - (0.05/2), n.sl - 1) * se.sl`.
## ℹ In group 1: `CLUTCH_NUMBER = 38`.
## Caused by warning in `qt()`:
## ! NaNs produced
## ℹ Run `dplyr::last_dplyr_warnings()` to see the 95 remaining warnings.
ggplot(data = m1.sl.df, aes(x=SL_MIDPOINT, y=emmean)) +
stat_smooth(aes(color=TEMPERATURE),
method = "lm") +
geom_pointrange(data = m1.sl.obs.summarize, aes(x =mean.sl.female,
y =mean.sl,
ymin =lower.ci.sl,
ymax =upper.ci.sl,
color = TEMPERATURE)) +
scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) +
scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
facet_wrap(~TEMPERATURE)+
xlab("PARENTAL MIDPOINT STANDARD LENGTH (mm)") +
ylab("OFFSPRING STANDARD LENGTH (mm)") +
ggtitle("Offspring-male relationship") +
theme_classic()## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 20 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Warning: Removed 11 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Warning: Removed 17 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.444 0.012 0.18 0.796 0.82 0.976 0.144 0.9 0.804 0.748 0.892 0.704 0.872 0.772 0.444 0.98 0.46 0.3 0.672 0.56 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.10826, p-value = 0.6404
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0221, p-value = 0.872
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 47, p-value = 1
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.00000000 0.07548573
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.10826, p-value = 0.6404
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0221, p-value = 0.872
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 47, p-value = 1
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.00000000 0.07548573
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0
## `check_outliers()` does not yet support models of class `glmmTMB`.
## NOTE: Results may be misleading due to involvement in interactions
## Family: gaussian ( identity )
## Formula:
## MEDIAN_STANDARD_LENGTH ~ scale(SL_MIDPOINT) * TEMPERATURE + scale(DENSITY)
## Data: m2_df
##
## AIC BIC logLik deviance df.resid
## 129.8 144.6 -56.9 113.8 39
##
##
## Dispersion estimate for gaussian family (sigma^2): 0.66
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 20.66363 0.20543 100.59 <2e-16 ***
## scale(SL_MIDPOINT) 0.05728 0.20403 0.28 0.779
## TEMPERATURE28.5 -0.10924 0.34024 -0.32 0.748
## TEMPERATURE30 -0.31993 0.29111 -1.10 0.272
## scale(DENSITY) -0.12710 0.14169 -0.90 0.370
## scale(SL_MIDPOINT):TEMPERATURE28.5 0.01383 0.32642 0.04 0.966
## scale(SL_MIDPOINT):TEMPERATURE30 -0.11149 0.29280 -0.38 0.703
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 20.2609966 21.0662668 20.66363169
## scale(SL_MIDPOINT) -0.3426036 0.4571724 0.05728442
## TEMPERATURE28.5 -0.7761001 0.5576149 -0.10924260
## TEMPERATURE30 -0.8904839 0.2506308 -0.31992659
## scale(DENSITY) -0.4048125 0.1506106 -0.12710095
## scale(SL_MIDPOINT):TEMPERATURE28.5 -0.6259404 0.6535998 0.01382971
## scale(SL_MIDPOINT):TEMPERATURE30 -0.6853741 0.4623879 -0.11149310
## Random effect variances not available. Returned R2 does not account for random effects.
## # R2 for Mixed Models
##
## Conditional R2: NA
## Marginal R2: 0.057
m2_df <-
m2_df |>
drop_na(SL_MIDPOINT)
m2.sl <- emmeans(model1a, ~ SL_MIDPOINT*TEMPERATURE,
at =list(SL_MIDPOINT=seq(from =min(m2_df$SL_MIDPOINT), to =max(m2_df$SL_MIDPOINT), by=.25)))
m2.sl.df <- as.data.frame(m2.sl)
m2.sl.obs <- drop_na(m2_df, SL_MIDPOINT, MEDIAN_STANDARD_LENGTH) |>
mutate(Pred =predict(model1a, re.form=NA, type ='response'),
Resid =residuals(model1a, type ='response'),
Fit =Pred+Resid)
m2.sl.obs.summarize <- m2.sl.obs |>
group_by(CLUTCH_NUMBER, TEMPERATURE) |>
summarise(mean.sl =mean(Fit, na.rm=TRUE),
mean.sl.male =mean(SL_MIDPOINT, na.rm = TRUE),
sd.sl =sd(Fit, na.rm =TRUE),
n.sl = n()) |>
mutate(se.sl = sd.sl / sqrt(n.sl),
lower.ci.sl =mean.sl - qt(1-(0.05/2), n.sl -1) * se.sl,
upper.ci.sl =mean.sl + qt(1-(0.05/2), n.sl -1) * se.sl) |>
ungroup()## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
## Warning: There were 90 warnings in `mutate()`.
## The first warning was:
## ℹ In argument: `lower.ci.sl = mean.sl - qt(1 - (0.05/2), n.sl - 1) * se.sl`.
## ℹ In group 1: `CLUTCH_NUMBER = 38`.
## Caused by warning in `qt()`:
## ! NaNs produced
## ℹ Run `dplyr::last_dplyr_warnings()` to see the 89 remaining warnings.
ggplot(data = m2.sl.df, aes(x=SL_MIDPOINT, y=emmean)) +
stat_smooth(aes(color=TEMPERATURE),
method = "lm",
formula = y ~ x) +
geom_pointrange(data = m2.sl.obs.summarize, aes(x =mean.sl.male,
y =mean.sl,
ymin =lower.ci.sl,
ymax =upper.ci.sl,
color = TEMPERATURE)) +
scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) +
scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
facet_wrap(~TEMPERATURE)+
xlab("PARENTAL MIDPOINT STANDARD LENGTH (mm)") +
ylab("OFFSPRING STANDARD LENGTH (mm)") +
ggtitle("Offspring-male relationship") +
theme_classic()## Warning: Removed 19 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Warning: Removed 10 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Warning: Removed 16 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.976 0.744 0.172 0.78 0.96 0.62 0.768 0.912 0.484 0.912 0.968 0.856 0.604 0.232 0.848 0.764 0.68 0.46 0.844 0.58 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.056667, p-value = 0.9979
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0217, p-value = 0.88
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 48, p-value = 1
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.00000000 0.07397279
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.056667, p-value = 0.9979
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0217, p-value = 0.88
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 48, p-value = 1
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.00000000 0.07397279
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0
## `check_outliers()` does not yet support models of class `glmmTMB`.
## NOTE: Results may be misleading due to involvement in interactions
## Family: gaussian ( identity )
## Formula:
## MEDIAN_STANDARD_LENGTH ~ scale(SL_MIDPOINT) * TEMPERATURE + scale(DENSITY)
## Data: m2.5_df
##
## AIC BIC logLik deviance df.resid
## 161.3 176.2 -72.6 145.3 40
##
##
## Dispersion estimate for gaussian family (sigma^2): 1.21
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 24.4619 0.2926 83.60 < 2e-16 ***
## scale(SL_MIDPOINT) 0.6666 0.2738 2.43 0.0149 *
## TEMPERATURE28.5 -0.6032 0.4408 -1.37 0.1712
## TEMPERATURE30 -0.4930 0.3940 -1.25 0.2109
## scale(DENSITY) -0.9187 0.1633 -5.63 1.85e-08 ***
## scale(SL_MIDPOINT):TEMPERATURE28.5 -0.8603 0.4403 -1.95 0.0507 .
## scale(SL_MIDPOINT):TEMPERATURE30 -0.5258 0.3911 -1.34 0.1788
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 23.8883879 25.035440279 24.4619141
## scale(SL_MIDPOINT) 0.1299964 1.203237725 0.6666171
## TEMPERATURE28.5 -1.4670448 0.260698033 -0.6031734
## TEMPERATURE30 -1.2651767 0.279263368 -0.4929567
## scale(DENSITY) -1.2387529 -0.598584869 -0.9186689
## scale(SL_MIDPOINT):TEMPERATURE28.5 -1.7233110 0.002721143 -0.8602949
## scale(SL_MIDPOINT):TEMPERATURE30 -1.2922905 0.240695093 -0.5257977
## Random effect variances not available. Returned R2 does not account for random effects.
## # R2 for Mixed Models
##
## Conditional R2: NA
## Marginal R2: 0.469
m2.5_df <- m2.5_df |>
drop_na(SL_MIDPOINT)
m2.5.sl <- emmeans(model1a, ~ SL_MIDPOINT*TEMPERATURE,
at =list(SL_MIDPOINT=seq(from =min(m2.5_df$SL_MIDPOINT), to =max(m2.5_df$SL_MIDPOINT), by=.25)))
m2.5.sl.df <- as.data.frame(m2.5.sl)
m2.5.sl.obs <- drop_na(m2.5_df, SL_MIDPOINT, MEDIAN_STANDARD_LENGTH) |>
mutate(Pred =predict(model1a, re.form=NA, type ='response'),
Resid =residuals(model1a, type ='response'),
Fit =Pred+Resid)
m2.5.sl.obs.summarize <- m2.5.sl.obs |>
group_by(CLUTCH_NUMBER, TEMPERATURE) |>
summarise(mean.sl =mean(Fit, na.rm=TRUE),
mean.sl.male =mean(SL_MIDPOINT, na.rm = TRUE),
sd.sl =sd(Fit, na.rm =TRUE),
n.sl = n()) |>
mutate(se.sl = sd.sl / sqrt(n.sl),
lower.ci.sl =mean.sl - qt(1-(0.05/2), n.sl -1) * se.sl,
upper.ci.sl =mean.sl + qt(1-(0.05/2), n.sl -1) * se.sl) |>
ungroup()## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
## Warning: There were 96 warnings in `mutate()`.
## The first warning was:
## ℹ In argument: `lower.ci.sl = mean.sl - qt(1 - (0.05/2), n.sl - 1) * se.sl`.
## ℹ In group 1: `CLUTCH_NUMBER = 38`.
## Caused by warning in `qt()`:
## ! NaNs produced
## ℹ Run `dplyr::last_dplyr_warnings()` to see the 95 remaining warnings.
ggplot(data = m2.5.sl.df, aes(x=SL_MIDPOINT, y=emmean)) +
stat_smooth(aes(color=TEMPERATURE),
method = "lm",
formula = y ~ x) +
geom_pointrange(data = m2.5.sl.obs.summarize, aes(x =mean.sl.male,
y =mean.sl,
ymin =lower.ci.sl,
ymax =upper.ci.sl,
color = TEMPERATURE)) +
scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) +
scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
facet_wrap(~TEMPERATURE)+
xlab("PARENTAL MALE STANDARD LENGTH (mm)") +
ylab("OFFSPRING STANDARD LENGTH (mm)") +
ggtitle("Offspring-male relationship") +
theme_classic()## Warning: Removed 17 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Warning: Removed 13 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Warning: Removed 18 rows containing missing values or values outside the scale range
## (`geom_segment()`).